# Load libraries, install if needed
library(tidyverse)
#> Warning: package 'tidyr' was built under R version 4.0.5
library(tidylog)
library(RCurl)
library(RColorBrewer)
#> Warning: package 'RColorBrewer' was built under R version 4.0.5
library(sdmTMB)
library(devtools)
library(patchwork)
library(sf)
#> Warning: package 'sf' was built under R version 4.0.5
library(ggeffects)
library(visreg)
library(boot)
# Source utm function
source_url("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/R/functions/lon-lat-utm.R")
# Continuous colors
# options(ggplot2.continuous.colour = "viridis")
#
# # Discrete colors
# scale_colour_discrete <- function(...) {
# scale_colour_brewer(palette = "Dark2")
# }
#
# scale_fill_discrete <- function(...) {
# scale_fill_brewer(palette = "Dark2")
# }
# Normally I'd source code for map plots, but this caused errors when ggplotting when knitting only, not when running the for loop in a fresh session...
# change to url once we have the final one
#source("/Users/maxlindmark/Dropbox/Max work/R/marine-litter/R/functions/map-plot.R")
# Seem I have to run this to avoid error when knitting (the ggplotting inside the for loop)
# Specify map ranges
ymin = 54; ymax = 60; xmin = 2; xmax = 21
map_data <- rnaturalearth::ne_countries(
scale = "medium",
returnclass = "sf",
continent = "europe")
# Crop the polygon for plotting and efficiency:
# st_bbox(map_data) # find the rough coordinates
sf::sf_use_s2(FALSE)
#> Spherical geometry (s2) switched off
swe_coast <- suppressWarnings(suppressMessages(
st_crop(map_data,
c(xmin = xmin, ymin = ymin, xmax = xmax, ymax = ymax))))
ggplot(swe_coast) +
geom_sf()
# Transform our map into UTM 33 coordinates, which is the equal-area projection we fit in:
utm_zone33 <- 32633
swe_coast_proj <- sf::st_transform(swe_coast, crs = utm_zone33)
ggplot(swe_coast_proj) +
geom_sf()
# Define plotting theme for main plot
theme_plot <- function(base_size = 11, base_family = "") {
theme_light(base_size = base_size, base_family = "") +
theme(
axis.text = element_text(color = "grey5"),
legend.position = "bottom",
legend.key.height = unit(0.2, "cm"),
legend.margin = margin(0, 0, 0, 0),
legend.box.margin = margin(-5, -5, -5, -5),
strip.background = element_rect(fill = "grey95"),
strip.text = element_text(color = "grey10"),
strip.text.x = element_text(margin = margin(b = 2, t = 2), color = "grey10", size = 10),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank()
#, axis.line = element_line(colour = "grey20"),
)
}
# Make default base map plot
#sf::st_boundary(swe_coast_proj)
xmin2 <- -61896.44*1.005
xmax2 <- 893074.5*0.91
ymin2 <- 5983578*1.025
ymax2 <- 6691902*0.99
plot_map <-
ggplot(swe_coast_proj) +
xlim(xmin2, xmax2) +
ylim(ymin2, ymax2) +
labs(x = "Longitude", y = "Latitude") +
geom_sf(size = 0.3) +
theme(axis.text.x = element_text(angle = 90)) +
theme_plot() +
NULL
plot_map_west <-
ggplot(swe_coast_proj) +
xlim(200000, xmax2*0.45) +
ylim(ymin2*1.015, ymax2*0.99) +
labs(x = "Longitude", y = "Latitude") +
geom_sf(size = 0.3) +
theme_plot() +
theme(axis.text.x = element_text(angle = 90)) +
NULL
Filter litter categories that have enough data to work with (hard to fit models to e.g., glass and metal since they occur so rarely in the data, some years have nothing. Could consider pooling years for those. See exploratory model fitting script (doesn’t exist yet))
# Read and make data long so that I can for loop through all categories
biom <- read_csv("data/west_coast_litter_biomass.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(quarter),
depth_sc = (depth - mean(depth)) / sd(depth)) %>%
rename(plastic = plast) %>%
pivot_longer(c(plastic, sup, fishery),
names_to = "litter_category",
values_to = "density") %>%
filter(litter_category %in% c("fishery", "plastic", "sup"))
#> Rows: 609 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (4): haul_id, ansse_area, trend_area, var
#> dbl (18): diverse, naturmaterial, plast, metall, glas, gummi, quarter, st_no...
#> lgl (1): balse_area
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#>
#> new variable 'depth_sc' (double) with 125 unique values and 0% NA
#>
#> rename: renamed one variable (plastic)
#>
#> pivot_longer: reorganized (plastic, sup, fishery) into (litter_category, density) [was 609x26, now 1827x25]
#>
#> filter: no rows removed
num <- read_csv("data/west_coast_litter_numbers.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(quarter),
depth_sc = (depth - mean(depth)) /sd(depth)) %>%
rename(plastic = plast) %>%
pivot_longer(c(plastic, sup, fishery),
names_to = "litter_category",
values_to = "density") %>%
mutate(number = density * swept_area_km2) %>%
filter(litter_category %in% c("fishery", "plastic", "sup"))
#> Rows: 609 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (4): haul_id, ansse_area, trend_area, var
#> dbl (18): diverse, naturmaterial, plast, metall, glas, gummi, quarter, st_no...
#> lgl (1): balse_area
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#>
#> new variable 'depth_sc' (double) with 125 unique values and 0% NA
#>
#> rename: renamed one variable (plastic)
#>
#> pivot_longer: reorganized (plastic, sup, fishery) into (litter_category, density) [was 609x26, now 1827x25]
#>
#> mutate: new variable 'number' (double) with 125 unique values and 0% NA
#>
#> filter: no rows removed
binom <- read_csv("data/west_coast_litter_biomass.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(quarter),
depth_sc = (depth - mean(depth)) / sd(depth)) %>%
rename(other = diverse,
natural = naturmaterial,
glass = glas,
metal = metall,
rubber = gummi) %>%
pivot_longer(c(other, natural, glass, metal, rubber),
names_to = "litter_category",
values_to = "density") %>%
filter(!litter_category %in% c("fishery", "plastic", "sup")) %>%
mutate(present = ifelse(density == 0, 0, 1))
#> Rows: 609 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (4): haul_id, ansse_area, trend_area, var
#> dbl (18): diverse, naturmaterial, plast, metall, glas, gummi, quarter, st_no...
#> lgl (1): balse_area
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#>
#> new variable 'depth_sc' (double) with 125 unique values and 0% NA
#>
#> rename: renamed 5 variables (other, natural, metal, glass, rubber)
#>
#> pivot_longer: reorganized (other, natural, metal, glass, rubber) into (litter_category, density) [was 609x26, now 3045x23]
#>
#> filter: no rows removed
#>
#> mutate: new variable 'present' (double) with 2 unique values and 0% NA
# Load pred grid
pred_grid_west <- read_csv("data/pred_grid_west.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(1),
depth_sc = (depth - mean(num$depth)) / sd(num$depth)) %>%
mutate(X = X*1000,
Y = Y*1000)
#> Rows: 42580 Columns: 4
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> dbl (4): X, Y, year, depth
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with one unique value and 0% NA
#>
#> new variable 'depth_sc' (double) with 3,207 unique values and 0% NA
#>
#> mutate: changed 42,580 values (100%) of 'X' (0 new NA)
#>
#> changed 42,580 values (100%) of 'Y' (0 new NA)
# hist(pred_grid_west$depth_sc)
# hist(pred_grid_west$depth)
# sd(pred_grid_west$depth)
plot_map_west +
theme_plot(base_size = 14) +
geom_point(data = pred_grid_west, aes(X, Y), alpha = 0.5, size = 0.01)
ggsave("figures/supp/pred_grid.png", dpi = 300)
#> Saving 12 x 7.42 in image
For plastic, sup and fishery I have enough data to fit spatial models with both biomass density (Tweedie) and numbers (Poisson). For the other categories, I will only fit presence/absence models, because that’s almost what we got anyway.
# https://haakonbakkagit.github.io/btopic104.html
# https://haakonbakkagit.github.io/btopic114.html
# max.edge <- mean(c(diff(range(num$X)), diff(range(num$Y)))) / 15
# cutoff <- max.edge/5
mesh <- make_mesh(num %>% filter(litter_category == "sup"), c("X", "Y"), cutoff = 4)
#> filter: removed 1,218 rows (67%), 609 rows remaining
#mesh <- make_mesh(num %>% filter(litter_category == "sup"), c("X", "Y"), type = "kmeans", n_knots = 50)
plot(mesh)
dd <- biom %>%
filter(litter_category == "fishery") %>%
droplevels()
#> filter: removed 1,218 rows (67%), 609 rows remaining
m0 <- sdmTMB(
data = dd,
formula = density ~ 0 + year_f + depth_sc,
mesh = mesh,
family = tweedie(),
spatial = "off",
time = "year",
spatiotemporal = "off"
)
dd$m0_resids <- residuals(m0) # randomized quantile residuals
hist(dd$m0_resids)
m1 <- sdmTMB(
data = dd,
formula = density ~ 0 + year_f + depth_sc,
mesh = mesh,
family = tweedie(),
spatial = "on",
time = "year",
spatiotemporal = "IID"
)
dd$m1_resids <- residuals(m1) # randomized quantile residuals
dd2 <- dd %>%
pivot_longer(c(m0_resids, m1_resids))
#> pivot_longer: reorganized (m0_resids, m1_resids) into (name, value) [was 609x27, now 1218x27]
ggplot(dd2 %>% filter(value > -3 & value < 3), aes(X, Y, col = value)) +
scale_colour_gradient2() +
geom_point(size = 2) +
facet_grid(name~year) +
theme_minimal() +
coord_fixed() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank())
#> filter: removed 11 rows (1%), 1,207 rows remaining
# Check spatial autocorrelation
pred_fixed <- predict(m0)$est
s <- simulate(m0, nsim = 500)
r <- DHARMa::createDHARMa(
simulatedResponse = s,
observedResponse = dd$density,
fittedPredictedResponse = pred_fixed
)
plot(r)
DHARMa::testSpatialAutocorrelation(r, x = dd$X, y = dd$Y)
#>
#> DHARMa Moran's I test for spatial autocorrelation
#>
#> data: r
#> observed = -0.0011183, expected = -0.0016447, sd = 0.0184956, p-value =
#> 0.9773
#> alternative hypothesis: Spatial autocorrelation
# In this case, it doesn't seem like a very clear spatial correlation
print(m1)
#> Spatiotemporal model fit by ML ['sdmTMB']
#> Formula: density ~ 0 + year_f + depth_sc
#> Mesh: mesh
#> Time column: year
#> Data: dd
#> Family: tweedie(link = 'log')
#>
#> coef.est coef.se
#> year_f2013 -10.44 1.42
#> year_f2014 -6.51 0.83
#> year_f2015 -4.43 0.53
#> year_f2016 -4.88 0.53
#> year_f2017 -4.37 0.49
#> year_f2018 -4.14 0.49
#> year_f2019 -4.95 0.55
#> year_f2020 -4.33 0.51
#> year_f2021 -5.33 0.53
#> year_f2022 -5.00 0.54
#> depth_sc -0.15 0.21
#>
#> Dispersion parameter: 0.87
#> Tweedie p: 1.57
#> Matern range: 7.45
#> Spatial SD: 1.18
#> Spatiotemporal SD: 4.20
#> ML criterion at convergence: 127.673
#>
#> See ?tidy.sdmTMB to extract these values as a data frame.
data_list_coef <- list()
data_list_pred <- list()
# Year as a random effect because in some years we don't have any observations at all of presences
for(i in unique(binom$litter_category)) {
dd <- binom %>%
filter(litter_category == i) %>%
droplevels()
m <- sdmTMB(
data = dd,
formula = present ~ depth_sc + (1|year_f),
offset = dd$swept_area_km2, #log
mesh = mesh,
family = binomial(link = "logit"),
spatial = "off",
time = "year",
spatiotemporal = "off",
control = sdmTMBcontrol(newton_loops = 1),
)
sanity(m)
tidy(m, conf.int = TRUE)
data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("binom", i, sep = "_"))
# Plot residuals
mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
qqline(mcmc_res)
# Check for spatial autocorrelation
pred_fixed <- m$family$linkinv(predict(m)$est)
s <- simulate(m, nsim = 500)
r <- DHARMa::createDHARMa(
simulatedResponse = s,
observedResponse = dd$present,
fittedPredictedResponse = pred_fixed
)
plot(r)
DHARMa::testSpatialAutocorrelation(r, x = dd$X, y = dd$Y)
# Plot conditional/marginal effects
visreg(m, xvar = "year_f", scale = "response")
nd <- data.frame(
depth_sc = mean(dd$depth_sc),
year = unique(as.integer(as.character(dd$year_f)))) %>%
mutate(year_f = as.factor(year),
X = 0,
Y = 0)
p <- predict(m, newdata = nd, se_fit = TRUE, re_form = NULL)
print(ggplot(p, aes(year, inv.logit(est),
ymin = inv.logit(est - 1.96 * est_se),
ymax = inv.logit(est + 1.96 * est_se)
)) +
geom_line() +
geom_ribbon(alpha = 0.4) +
coord_cartesian(expand = F))
data_list_pred[[i]] <- p %>% mutate(model = i)
# Save model object
saveRDS(m, paste("output/models/binom_", i, ".rds", sep = ""))
print(ggplot(p, aes(year, y = inv.logit(est), ymax = inv.logit(est + 1.96*est_se), ymin = inv.logit(est - 1.96*est_se))) +
geom_line() +
geom_line(data = dd %>%
group_by(year) %>%
summarise(present = mean(present)),
aes(year, present, color = "Data (mean)"), linetype = 2,
inherit.aes = FALSE) + # Add data
geom_ribbon(alpha = 0.2) +
scale_color_brewer(palette = "Set1", name = "") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
theme_plot() +
xlab('Year') +
ylab('Mean estimate') +
NULL)
ggsave(paste("figures/supp/mean_pred_comp_binom_", i, ".png", sep = ""), dpi = 300)
}
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000362 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.62 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.614187 seconds (Warm-up)
#> Chain 1: 0.002833 seconds (Sampling)
#> Chain 1: 0.61702 seconds (Total)
#> Chain 1:
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#> new variable 'X' (double) with one unique value and 0% NA
#> new variable 'Y' (double) with one unique value and 0% NA
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000354 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.54 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.577917 seconds (Warm-up)
#> Chain 1: 0.0027 seconds (Sampling)
#> Chain 1: 0.580617 seconds (Total)
#> Chain 1:
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#> new variable 'X' (double) with one unique value and 0% NA
#> new variable 'Y' (double) with one unique value and 0% NA
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_G` standard error may be large
#> ℹ `ln_tau_G` is an internal parameter affecting `sigma_G`
#> ℹ `sigma_G` is the random intercept standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `sigma_G` is smaller than 0.01
#> ℹ Consider omitting this part of the model
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000356 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.56 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.48534 seconds (Warm-up)
#> Chain 1: 0.001462 seconds (Sampling)
#> Chain 1: 0.486802 seconds (Total)
#> Chain 1:
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#> new variable 'X' (double) with one unique value and 0% NA
#> new variable 'Y' (double) with one unique value and 0% NA
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000409 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 4.09 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.694304 seconds (Warm-up)
#> Chain 1: 0.003123 seconds (Sampling)
#> Chain 1: 0.697427 seconds (Total)
#> Chain 1:
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#> new variable 'X' (double) with one unique value and 0% NA
#> new variable 'Y' (double) with one unique value and 0% NA
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 2,436 rows (80%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_G` standard error may be large
#> ℹ `ln_tau_G` is an internal parameter affecting `sigma_G`
#> ℹ `sigma_G` is the random intercept standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `sigma_G` is smaller than 0.01
#> ℹ Consider omitting this part of the model
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000363 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.63 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.487372 seconds (Warm-up)
#> Chain 1: 0.001425 seconds (Sampling)
#> Chain 1: 0.488797 seconds (Total)
#> Chain 1:
#> mutate: new variable 'year_f' (factor) with 10 unique values and 0% NA
#> new variable 'X' (double) with one unique value and 0% NA
#> new variable 'Y' (double) with one unique value and 0% NA
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
# Save predictions and sims as data frames
dat_coef_binom <- dplyr::bind_rows(data_list_coef)
dat_pred_binom <- dplyr::bind_rows(data_list_pred)
write_csv(dat_coef_binom, "output/dat_coef_binom.csv")
write_csv(dat_pred_binom, "output/dat_pred_binom.csv")
data_list_coef <- list()
data_list_pred <- list()
data_list_sim <- list()
data_list_sims <- list()
for(i in unique(num$litter_category)) {
dd <- num %>%
filter(litter_category == i) %>%
droplevels()
m <- sdmTMB(
data = dd,
formula = number ~ -1 + year_f + depth_sc,
mesh = mesh,
offset = dd$swept_area_km2, #should be log
family = nbinom2(link = "log"),
spatial = "on",
time = "year",
spatiotemporal = "off",
control = sdmTMBcontrol(newton_loops = 1)
)
sanity(m)
tidy(m, conf.int = TRUE)
data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("poisson", i, sep = "_"))
# Plot residuals
mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
qqline(mcmc_res)
# Plot conditional/marginal effects
visreg(m, xvar = "year_f", scale = "response")
# Save model object
saveRDS(m, paste("output/models/numbers_", i, ".rds", sep = ""))
# Predict on grid
pred <- predict(m, newdata = pred_grid_west) %>%
mutate(model = i)
data_list_pred[[i]] <- pred
# Get sims
ncells <- filter(pred_grid_west, year == max(pred_grid_west$year)) %>% nrow()
nsim <- 500
sim <- predict(m, newdata = pred_grid_west, nsim = nsim)
# Plot CV in space
# Just plot last year
sim_last <- sim[pred_grid_west$year == max(pred_grid_west$year), ]
pred_last <- pred[pred$year == max(pred_grid_west$year), ]
pred_last$cv <- round(apply(exp(sim_last), 1, function(x) sd(x) / mean(x)), 2)
print(plot_map_west +
geom_raster(data = pred_last, aes(X, Y, fill = cv)) +
scale_fill_viridis_c() +
geom_sf(size = 0.1) +
NULL)
ggsave(paste("figures/supp/cv_numbers_", i, ".png", sep = ""))
# Get index & full index (i.e. returning all sims)
index_sim <- get_index_sims(sim,
area = rep(1/ncells, nrow(sim))) %>% mutate(model = i) # Note that we just get the means here
data_list_sim[[i]] <- index_sim
index_sim_full <- get_index_sims(sim,
area = rep(1/ncells, nrow(sim)),
return_sims = TRUE) %>% mutate(model = i) # Note that we just get the means here
data_list_sims[[i]] <- index_sim_full
# See how mean index compares to data
print(ggplot(index_sim, aes(year, y = est, ymin = lwr, ymax = upr)) +
geom_line() +
geom_line(data = dd %>%
group_by(year) %>%
summarise(mean_number = mean(number)),
aes(year, mean_number, color = "Data (mean)"), linetype = 2,
inherit.aes = FALSE) + # Add data
geom_ribbon(alpha = 0.2) +
scale_color_brewer(palette = "Set1", name = "") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
theme_plot() +
xlab('Year') +
ylab('Mean estimate') +
NULL)
ggsave(paste("figures/supp/mean_pred_comp_num_", i, ".png", sep = ""), dpi = 300)
}
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000704 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 7.04 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 2.28227 seconds (Warm-up)
#> Chain 1: 0.010989 seconds (Sampling)
#> Chain 1: 2.29326 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining
#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> group_by: one grouping variable (year)
#>
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000536 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 5.36 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 1.86109 seconds (Warm-up)
#> Chain 1: 0.00839 seconds (Sampling)
#> Chain 1: 1.86948 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining
#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000605 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 6.05 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 2.45769 seconds (Warm-up)
#> Chain 1: 0.009512 seconds (Sampling)
#> Chain 1: 2.4672 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining
#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
# Save predictions and sims as data frames
dat_coef_num <- dplyr::bind_rows(data_list_coef)
dat_pred_num <- dplyr::bind_rows(data_list_pred)
dat_sim_num <- dplyr::bind_rows(data_list_sim)
dat_sims_num <- dplyr::bind_rows(data_list_sims)
write_csv(dat_coef_num, "output/dat_coef_num.csv")
write_csv(dat_pred_num, "output/dat_pred_num.csv")
write_csv(dat_sim_num, "output/dat_sim_num.csv")
write_csv(dat_sims_num, "output/dat_sims_num.csv")
data_list_coef <- list()
data_list_pred <- list()
data_list_sim <- list()
data_list_sims <- list()
for(i in unique(biom$litter_category)) {
dd <- biom %>%
filter(litter_category == i) %>%
droplevels()
m <- sdmTMB(
data = dd,
formula = density ~ 0 + year_f + depth_sc,
mesh = mesh,
family = tweedie(),
spatial = "on",
time = "year",
spatiotemporal = "off"
)
sanity(m)
tidy(m, conf.int = TRUE)
data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("tweedie", i, sep = "_"))
# Plot residuals
mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
qqline(mcmc_res)
# Save model object
saveRDS(m, paste("output/models/biomass_", i, ".rds", sep = ""))
# Predict on grid
pred <- predict(m, newdata = pred_grid_west) %>%
mutate(model = i)
data_list_pred[[i]] <- pred
# Get sims
nsim <- 500
sim <- predict(m, newdata = pred_grid_west, nsim = nsim)
# Plot CV in space
# Just plot last year
sim_last <- sim[pred_grid_west$year == max(pred_grid_west$year), ]
pred_last <- pred[pred$year == max(pred_grid_west$year), ]
pred_last$cv <- round(apply(exp(sim_last), 1, function(x) sd(x) / mean(x)), 2)
print(plot_map_west +
geom_raster(data = pred_last, aes(X, Y, fill = cv)) +
scale_fill_viridis_c() +
geom_sf(size = 0.1) +
NULL)
ggsave(paste("figures/supp/cv_biomass_", i, ".png", sep = ""))
# Get index & full index (i.e. returning all sims)
index_sim <- get_index_sims(sim,
area = rep(2*2, nrow(sim))) %>% mutate(model = i)
data_list_sim[[i]] <- index_sim
index_sim_full <- get_index_sims(sim,
area = rep(2*2, nrow(sim)),
return_sims = TRUE) %>% mutate(model = i)
data_list_sims[[i]] <- index_sim_full
# See how mean index compares to data
ncells <- filter(pred_grid_west, year == max(pred_grid_west$year)) %>% nrow()
index_sim_avg <- get_index_sims(sim, area = rep(1/ncells, nrow(sim)))
print(ggplot(index_sim_avg, aes(year, y = est, ymin = lwr, ymax = upr)) +
geom_line() +
geom_line(data = dd %>%
group_by(year) %>%
summarise(mean_density = mean(density)),
aes(year, mean_density, color = "Data (mean)"), linetype = 2,
inherit.aes = FALSE) + # Add data
#geom_ribbon(alpha = 0.2) +
scale_color_brewer(palette = "Set1", name = "") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
theme_plot() +
xlab('Year') +
ylab('Mean biomass estimate (kg)') +
NULL)
ggsave(paste("figures/supp/mean_pred_comp_biomass_", i, ".png", sep = ""), dpi = 300)
}
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000159 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.59 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.569998 seconds (Warm-up)
#> Chain 1: 0.002395 seconds (Sampling)
#> Chain 1: 0.572393 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> filter: removed 38,322 rows (90%), 4,258 rows remaining
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> group_by: one grouping variable (year)
#>
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000142 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.42 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.522939 seconds (Warm-up)
#> Chain 1: 0.002359 seconds (Sampling)
#> Chain 1: 0.525298 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 1,218 rows (67%), 609 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000153 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.53 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.613475 seconds (Warm-up)
#> Chain 1: 0.00242 seconds (Sampling)
#> Chain 1: 0.615895 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> Saving 12 x 7.42 in image
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 38,322 rows (90%), 4,258 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 10 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
# Save predictions and sims as data frames
dat_coef_biom <- dplyr::bind_rows(data_list_coef)
dat_pred_biom <- dplyr::bind_rows(data_list_pred)
dat_sim_biom <- dplyr::bind_rows(data_list_sim)
dat_sims_biom <- dplyr::bind_rows(data_list_sims)
write_csv(dat_coef_biom, "output/dat_coef_biomass.csv")
write_csv(dat_pred_biom, "output/dat_pred_biomass.csv")
write_csv(dat_sim_biom, "output/dat_sim_biomass.csv")
write_csv(dat_sims_biom, "output/dat_sims_biomass.csv")